this makes in_tpa SMP-safe.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
return;
}
- fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
+ fault = vcpu_translate(current,address,is_data,0,&pteval,&itir,&iha);
if (fault == IA64_NO_FAULT) {
pteval = translate_domain_pte(pteval,address,itir);
vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(itir>>2)&0x3f);
*
*/
-#if 1
-// TEMPORARY PATCH for match_dtlb uses this, can be removed later
-// FIXME SMP
-int in_tpa = 0;
-#endif
-
#include <linux/sched.h>
#include <public/arch-ia64.h>
#include <asm/ia64_int.h>
int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
-IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
+IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, BOOLEAN in_tpa, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
{
unsigned long region = address >> 61;
unsigned long pta, pte, rid, rr;
UINT64 pteval, itir, mask, iha;
IA64FAULT fault;
- in_tpa = 1;
- fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir, &iha);
- in_tpa = 0;
+ fault = vcpu_translate(vcpu, vadr, TRUE, TRUE, &pteval, &itir, &iha);
if (fault == IA64_NO_FAULT)
{
mask = itir_mask(itir);
extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
-extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha);
+extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address,
+ BOOLEAN is_data, BOOLEAN in_tpa,
+ UINT64 *pteval, UINT64 *itir, UINT64 *iha);
extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
extern IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa);
extern IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr);